#include <xen/spinlock.h>
#include <xen/slab.h>
#include <xen/irq.h>
+ #include <xen/softirq.h>
#include <asm/domain_page.h>
+#include <asm/page.h>
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
void free_domheap_pages(struct pfn_info *pg, unsigned int order)
{
int i, drop_dom_ref;
- struct domain *d = pg->u.inuse.domain;
+ struct domain *d = page_get_owner(pg);
+ struct exec_domain *ed;
- void *p;
+ int cpu_mask = 0;
ASSERT(!in_irq());
{
ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
pg[i].tlbflush_timestamp = tlbflush_current_time();
- pg[i].u.free.cpu_mask = 1 << d->processor;
+ pg[i].u.free.cpu_mask = cpu_mask;
list_del(&pg[i].list);
+ }
+
+ d->tot_pages -= 1 << order;
+ drop_dom_ref = (d->tot_pages == 0);
+
+ spin_unlock_recursive(&d->page_alloc_lock);
- if ( likely(!test_bit(DF_DYING, &d->flags)) )
++ if ( likely(!test_bit(DF_DYING, &d->d_flags)) )
+ {
+ free_heap_pages(MEMZONE_DOM, pg, order);
+ }
+ else
+ {
/*
* Normally we expect a domain to clear pages before freeing them,
* if it cares about the secrecy of their contents. However, after
return avail[MEMZONE_DOM];
}
+
+
+ /*************************
+ * PAGE SCRUBBING
+ */
+
+ static spinlock_t page_scrub_lock;
+ struct list_head page_scrub_list;
+
+ static void page_scrub_softirq(void)
+ {
+ struct list_head *ent;
+ struct pfn_info *pg;
+ void *p;
+ int i;
+ s_time_t start = NOW();
+
+ /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+ do {
+ spin_lock(&page_scrub_lock);
+
+ if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
+ {
+ spin_unlock(&page_scrub_lock);
+ return;
+ }
+
+ /* Peel up to 16 pages from the list. */
+ for ( i = 0; i < 16; i++ )
+ if ( (ent = ent->next) == &page_scrub_list )
+ break;
+
+ /* Remove peeled pages from the list. */
+ ent->next->prev = &page_scrub_list;
+ page_scrub_list.next = ent->next;
+
+ spin_unlock(&page_scrub_lock);
+
+ /* Working backwards, scrub each page in turn. */
+ while ( ent != &page_scrub_list )
+ {
+ pg = list_entry(ent, struct pfn_info, list);
+ ent = ent->prev;
+ p = map_domain_mem(page_to_phys(pg));
+ clear_page(p);
+ unmap_domain_mem(p);
+ free_heap_pages(MEMZONE_DOM, pg, 0);
+ }
+ } while ( (NOW() - start) < MILLISECS(1) );
+ }
+
+ static __init int page_scrub_init(void)
+ {
+ spin_lock_init(&page_scrub_lock);
+ INIT_LIST_HEAD(&page_scrub_list);
+ open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
+ return 0;
+ }
+ __initcall(page_scrub_init);
++
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
TRACE_0D(TRC_SCHED_T_TIMER_FN);
- if ( !is_idle_task(d) && update_dom_time(d) )
- send_guest_virq(d, VIRQ_TIMER);
+ if ( !is_idle_task(ed->domain) && update_dom_time(ed) )
+ send_guest_virq(ed, VIRQ_TIMER);
- t_timer[d->processor].expires = NOW() + MILLISECS(10);
- add_ac_timer(&t_timer[d->processor]);
+ page_scrub_schedule_work();
+
+ t_timer[ed->processor].expires = NOW() + MILLISECS(10);
+ add_ac_timer(&t_timer[ed->processor]);
}
/* Domain timer function, sends a virtual timer interrupt to domain */